bitkeeper revision 1.983 (40d46e62pNngJp16CZ2sqZwmplr_Kw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Sat, 19 Jun 2004 16:48:34 +0000 (16:48 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Sat, 19 Jun 2004 16:48:34 +0000 (16:48 +0000)
More cleanups.

29 files changed:
.rootkeys
xen/arch/x86/apic.c
xen/arch/x86/nmi.c
xen/arch/x86/pci-irq.c
xen/arch/x86/process.c
xen/arch/x86/setup.c
xen/arch/x86/smp.c
xen/arch/x86/smpboot.c
xen/arch/x86/time.c
xen/arch/x86/traps.c
xen/common/ac_timer.c
xen/common/debug.c
xen/common/dom0_ops.c
xen/common/domain.c
xen/common/event_channel.c
xen/common/kernel.c
xen/common/keyhandler.c
xen/common/memory.c
xen/common/sched_bvt.c
xen/common/schedule.c
xen/common/shadow.c
xen/common/softirq.c
xen/drivers/char/serial.c
xen/include/xen/event.h
xen/include/xen/interrupt.h [deleted file]
xen/include/xen/irq.h
xen/include/xen/sched.h
xen/include/xen/smp.h
xen/include/xen/softirq.h [new file with mode: 0644]

index 287fc7a3c352d2566a8bbe45cfad659fee7f59b5..8d43de45d10e98d0a86d75fefcb0881502ee8bf4 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h
 3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
 3ddb79c0GurNF9tDWqQbAwJFH8ugfA xen/include/xen/init.h
-3ddb79c1Vi5VleJAOKHAlY0G2zAsgw xen/include/xen/interrupt.h
 3ddb79c1nzaWu8NoF4xCCMSFJR4MlA xen/include/xen/ioport.h
 3ddb79c2qAxCOABlkKtD8Txohe-qEw xen/include/xen/irq.h
 3ddb79c2b3qe-6Ann09FqZBF4IrJaQ xen/include/xen/irq_cpustat.h
 405b8599BsDsDwKEJLS0XipaiQW3TA xen/include/xen/shadow.h
 3ddb79c14dXIhP7C2ahnoD08K90G_w xen/include/xen/slab.h
 3ddb79c09xbS-xxfKxuV3JETIhBzmg xen/include/xen/smp.h
+3ddb79c1Vi5VleJAOKHAlY0G2zAsgw xen/include/xen/softirq.h
 3ddb79c2iIcESrDAB8samy_yAh6olQ xen/include/xen/spinlock.h
 3e7f358aMtFMUVvN_Zjg5qvEJIqEBA xen/include/xen/string.h
 3ddb79c0BnA20PbgmuMPSGIBljNRQw xen/include/xen/time.h
index b74e358e6acf808335d08164431d1630b0620b24..e36f7ff44b24250b7b7e04e55fa1954cea540f4b 100644 (file)
 
 
 #include <xen/config.h>
+#include <xen/ac_timer.h>
+#include <xen/perfc.h>
+#include <xen/errno.h>
 #include <xen/init.h>
+#include <xen/mm.h>
 #include <xen/sched.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
+#include <xen/smp.h>
+#include <xen/softirq.h>
 #include <asm/mc146818rtc.h>
 #include <asm/msr.h>
-#include <xen/errno.h>
 #include <asm/atomic.h>
-#include <xen/smp.h>
-#include <xen/interrupt.h>
 #include <asm/mpspec.h>
 #include <asm/flushtlb.h>
 #include <asm/hardirq.h>
 #include <asm/apic.h>
-#include <xen/mm.h>
 #include <asm/io_apic.h>
-#include <xen/ac_timer.h>
-#include <xen/perfc.h>
 
 
 /* Using APIC to generate smp_local_timer_interrupt? */
@@ -726,14 +726,12 @@ unsigned int apic_timer_irqs [NR_CPUS];
 
 void smp_apic_timer_interrupt(struct pt_regs * regs)
 {
-    int cpu = smp_processor_id();
-
     ack_APIC_irq();
 
-    apic_timer_irqs[cpu]++;
+    apic_timer_irqs[smp_processor_id()]++;
     perfc_incrc(apic_timer);
 
-    __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
+    raise_softirq(AC_TIMER_SOFTIRQ);
 }
 
 /*
index 3123ee2f139c350c777f685e0bce02a62a93320c..af287444ebbf94e5d3b6cf9547b2257873a2ac66 100644 (file)
 #include <xen/mm.h>
 #include <xen/irq.h>
 #include <xen/delay.h>
-#include <xen/interrupt.h>
 #include <xen/time.h>
 #include <xen/sched.h>
-
 #include <asm/mc146818rtc.h>
 #include <asm/smp.h>
 #include <asm/msr.h>
index 8821fe74aa230e420ad7d301c04b9e625a18d1ca..cf19e9e9a149f32c5cfa233bb27220cff37fab0c 100644 (file)
 #include <xen/pci.h>
 #include <xen/init.h>
 #include <xen/slab.h>
-#include <xen/interrupt.h>
 #include <xen/irq.h>
-
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/io_apic.h>
-
 #include "pci-x86.h"
 
 #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
index ab48d85765368c8d1a0be825575f367bf2df39f0..dbc7eb2b7a568a83a87450b9554cb58b8532d07e 100644 (file)
 #include <xen/errno.h>
 #include <xen/sched.h>
 #include <xen/smp.h>
-#include <asm/ptrace.h>
 #include <xen/delay.h>
-#include <xen/interrupt.h>
+#include <xen/softirq.h>
+#include <asm/ptrace.h>
 #include <asm/mc146818rtc.h>
-
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/processor.h>
@@ -77,7 +76,7 @@ void startup_cpu_idle_loop(void)
 {
     /* Just some sanity to ensure that the scheduler is set up okay. */
     ASSERT(current->domain == IDLE_DOMAIN_ID);
-    domain_controller_unpause(current);
+    domain_start(current);
     __enter_scheduler();
 
     /*
index b9982f31e6417019bb25df89981d089387b70712..2e450497d46ee618c1e580d0e1d49cc3ef528425 100644 (file)
@@ -1,11 +1,11 @@
 
 #include <xen/config.h>
 #include <xen/init.h>
-#include <xen/interrupt.h>
 #include <xen/lib.h>
 #include <xen/sched.h>
 #include <xen/pci.h>
 #include <xen/serial.h>
+#include <xen/softirq.h>
 #include <xen/acpi.h>
 #include <asm/bitops.h>
 #include <asm/smp.h>
index 607c0839465b211918b5c1706bd18ff73cfd9334..b9a0fec0ad0b8b3183225e61caf5907f0e473234 100644 (file)
@@ -305,7 +305,9 @@ void flush_tlb_all_pge(void)
 
 void smp_send_event_check_mask(unsigned long cpu_mask)
 {
-    send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
+    cpu_mask &= ~(1<<smp_processor_id());
+    if ( cpu_mask != 0 )
+        send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
 }
 
 /*
index d03fa9540fc0e17c7d3d198b7497a1a850a56a65..ff8bcb7faab345cd8279495ed94de6e7cb15bc63 100644 (file)
@@ -34,7 +34,6 @@
 
 #include <xen/config.h>
 #include <xen/init.h>
-#include <xen/interrupt.h>
 #include <xen/irq.h>
 #include <xen/mm.h>
 #include <xen/slab.h>
index 4cfc992b82c1e005a9baa0ab9c669630be958494..dec7618ed7537a983dddf5d5b132b30d1098f23c 100644 (file)
 #include <xen/lib.h>
 #include <xen/config.h>
 #include <xen/init.h>
-#include <xen/interrupt.h>
 #include <xen/time.h>
 #include <xen/ac_timer.h>
-
-#include <asm/io.h>
 #include <xen/smp.h>
 #include <xen/irq.h>
+#include <xen/softirq.h>
+#include <asm/io.h>
 #include <asm/msr.h>
 #include <asm/mpspec.h>
 #include <asm/processor.h>
@@ -93,7 +92,7 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
     /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
     if ( do_timer_lists_from_pit )
-        __cpu_raise_softirq(smp_processor_id(), AC_TIMER_SOFTIRQ);
+        raise_softirq(AC_TIMER_SOFTIRQ);
 }
 
 static struct irqaction irq0 = { timer_interrupt, "timer", NULL};
index 5a28b087eecb8940d1c4d76bbf67749d68e571da..7a39277301bd9bc1170b62f188e44f82bf7984f5 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <xen/config.h>
 #include <xen/init.h>
-#include <xen/interrupt.h>
 #include <xen/sched.h>
 #include <xen/lib.h>
 #include <xen/errno.h>
index 09a8fff4a5550bad97abed7fa176805591215a26..73fa60d5af8cccf8d644eab3e6cae943071841b3 100644 (file)
@@ -21,7 +21,7 @@
 #include <xen/smp.h>
 #include <xen/perfc.h>
 #include <xen/time.h>
-#include <xen/interrupt.h>
+#include <xen/softirq.h>
 #include <xen/ac_timer.h>
 #include <xen/keyhandler.h>
 #include <asm/system.h>
@@ -154,84 +154,58 @@ static int add_entry(struct ac_timer **heap, struct ac_timer *t)
  * TIMER OPERATIONS.
  */
 
-static inline unsigned long __add_ac_timer(struct ac_timer *timer)
+static inline void __add_ac_timer(struct ac_timer *timer)
 {
     int cpu = timer->cpu;
-    unsigned long cpu_mask = 0;
-
     if ( add_entry(ac_timers[cpu].heap, timer) )
-    {
-        __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
-        cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0;
-    }
-
-    return cpu_mask;
+        cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
 }
 
 void add_ac_timer(struct ac_timer *timer) 
 {
     int           cpu = timer->cpu;
-    unsigned long flags, cpu_mask;
+    unsigned long flags;
 
     spin_lock_irqsave(&ac_timers[cpu].lock, flags);
     ASSERT(timer != NULL);
     ASSERT(!active_ac_timer(timer));
-    cpu_mask = __add_ac_timer(timer);
+    __add_ac_timer(timer);
     spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-
-    if ( cpu_mask ) 
-        smp_send_event_check_mask(cpu_mask);
 }
 
 
-static inline unsigned long __rem_ac_timer(struct ac_timer *timer)
+static inline void __rem_ac_timer(struct ac_timer *timer)
 {
     int cpu = timer->cpu;
-    unsigned long cpu_mask = 0;
-
     if ( remove_entry(ac_timers[cpu].heap, timer) )
-    {
-        __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
-        cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0;
-    }
-
-    return cpu_mask;
+        cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
 }
 
 void rem_ac_timer(struct ac_timer *timer)
 {
     int           cpu = timer->cpu;
-    unsigned long flags, cpu_mask = 0;
+    unsigned long flags;
 
     spin_lock_irqsave(&ac_timers[cpu].lock, flags);
     ASSERT(timer != NULL);
     if ( active_ac_timer(timer) )
-        cpu_mask = __rem_ac_timer(timer);
+        __rem_ac_timer(timer);
     spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-
-    if ( cpu_mask ) 
-        smp_send_event_check_mask(cpu_mask);
 }
 
 
 void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
 {
     int           cpu = timer->cpu;
-    unsigned long flags, cpu_mask = 0;
+    unsigned long flags;
 
     spin_lock_irqsave(&ac_timers[cpu].lock, flags);
-
     ASSERT(timer != NULL);
-
     if ( active_ac_timer(timer) )
-        cpu_mask = __rem_ac_timer(timer);
+        __rem_ac_timer(timer);
     timer->expires = new_time;
-    cpu_mask |= __add_ac_timer(timer);
-
+    __add_ac_timer(timer);
     spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-
-    if ( cpu_mask ) 
-        smp_send_event_check_mask(cpu_mask);
 }
 
 
index f4eeaaee5ef874a5d01ccfdc2654d989983cb8a3..ce911bd11f37f6ad36c9901e86deff8ddf4501e3 100644 (file)
@@ -49,11 +49,11 @@ void pdb_do_debug (dom0_op_t *op)
     {
         case 'c' :
        {
-           struct domain *p = find_domain_by_id(op->u.debug.domain);
-           if ( p != NULL )
+           struct domain *d = find_domain_by_id(op->u.debug.domain);
+           if ( d != NULL )
            {
-                domain_controller_unpause(p);
-               put_domain(p);
+                domain_start(d);
+               put_domain(d);
            }
            else
            {
@@ -66,13 +66,13 @@ void pdb_do_debug (dom0_op_t *op)
             int loop;
             u_char x;
            unsigned long cr3;
-           struct domain *p;
+           struct domain *d;
 
-           p = find_domain_by_id(op->u.debug.domain);
-           if (p->mm.shadow_mode)
-             cr3 = pagetable_val(p->mm.shadow_table);
+           d = find_domain_by_id(op->u.debug.domain);
+           if ( d->mm.shadow_mode )
+             cr3 = pagetable_val(d->mm.shadow_table);
            else
-             cr3 = pagetable_val(p->mm.pagetable);
+             cr3 = pagetable_val(d->mm.pagetable);
 
             for (loop = 0; loop < op->u.debug.in2; loop++)         /* length */
             { 
@@ -85,17 +85,17 @@ void pdb_do_debug (dom0_op_t *op)
                 printk (" %02x", x);
             }
             printk ("\n");
-           put_domain(p);
+           put_domain(d);
             break;
         }
         case 's' :
        {
-           struct domain * p = find_domain_by_id(op->u.debug.domain);
+           struct domain *d = find_domain_by_id(op->u.debug.domain);
 
-           if (p != NULL)
+           if ( d != NULL )
            {
-                domain_controller_pause(p);
-               put_domain(p);
+                domain_stop(d);
+               put_domain(d);
            }
            else
            {
@@ -109,5 +109,4 @@ void pdb_do_debug (dom0_op_t *op)
                   op->u.debug.opcode, op->u.debug.opcode);
        }
     }
-    return;
 }
index fd9eb2b2d66acceb15a64ec82e731aba21b6731c..5128b150700cb47e95f525787db11f2ec4a038b1 100644 (file)
@@ -89,7 +89,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
             ret = -EINVAL;
             if ( test_bit(DF_CONSTRUCTED, &d->flags) )
             {
-                domain_controller_unpause(d);
+                domain_start(d);
                 ret = 0;
             }
             put_domain(d);
@@ -103,7 +103,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
         ret = -ESRCH;
         if ( d != NULL )
         {
-            domain_controller_pause(d);
+            domain_stop(d);
             put_domain(d);
             ret = 0;
         }
@@ -196,14 +196,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
             {
                 if ( cpu == -1 )
                 {
-                    p->cpupinned = 0;
+                    clear_bit(DF_CPUPINNED, &p->flags);
                 }
                 else
                 {
                     domain_pause(p);
+                    set_bit(DF_CPUPINNED, &p->flags);
                     cpu = cpu % smp_num_cpus;
                     p->processor = cpu;
-                    p->cpupinned = 1;                    
                     domain_unpause(p);
                 }
                 put_domain(p);
@@ -295,14 +295,18 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
                 op->u.getdomaininfo.flags = DOMSTATE_CRASHED;
             else if ( test_bit(DF_SUSPENDED, &p->flags) )
                 op->u.getdomaininfo.flags = DOMSTATE_SUSPENDED;
-            else if ( test_bit(DF_CONTROLPAUSE, &p->flags) )
+            else if ( test_bit(DF_STOPPED, &p->flags) )
                 op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
             else if ( test_bit(DF_BLOCKED, &p->flags) )
                 op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
+            else if ( test_bit(DF_RUNNING, &p->flags) )
+            {
+                op->u.getdomaininfo.flags = DOMSTATE_RUNNING;
+                dump_state = 1;
+            }
             else
             {
-                op->u.getdomaininfo.flags = 
-                    p->has_cpu ? DOMSTATE_RUNNING : DOMSTATE_RUNNABLE;
+                op->u.getdomaininfo.flags = DOMSTATE_RUNNABLE;
                 dump_state = 1;
             }
 
index fe3622ab294666ca701def4df840d6cdace6e483..37f866113e3f5c4d00c7d96f72849067119c47c0 100644 (file)
@@ -5,19 +5,18 @@
 #include <xen/errno.h>
 #include <xen/sched.h>
 #include <xen/mm.h>
-#include <xen/interrupt.h>
 #include <xen/delay.h>
 #include <xen/event.h>
 #include <xen/time.h>
 #include <xen/shadow.h>
-#include <hypervisor-ifs/dom0_ops.h>
+#include <xen/console.h>
+#include <xen/shadow.h>
 #include <asm/io.h>
 #include <asm/domain_page.h>
 #include <asm/flushtlb.h>
 #include <asm/msr.h>
-#include <xen/console.h>
 #include <asm/i387.h>
-#include <xen/shadow.h>
+#include <hypervisor-ifs/dom0_ops.h>
 
 #if defined(__x86_64__)
 #define ELFSIZE 64
@@ -185,7 +184,7 @@ void domain_crash(void)
     struct domain *d;
 
     set_bit(DF_CRASHED, &current->flags);
-    
+
     d = find_domain_by_id(0);
     send_guest_virq(d, VIRQ_DOM_EXC);
     put_domain(d);
index bd529323bb6f13774bff9604699d5c2e00c24efa..ee81bd7eed4a73ed628371f485c97da4870c358d 100644 (file)
 #define INIT_EVENT_CHANNELS   16
 #define MAX_EVENT_CHANNELS  1024
 
-static int get_free_port(struct domain *p)
+static int get_free_port(struct domain *d)
 {
     int max, port;
     event_channel_t *chn;
 
-    max = p->max_event_channel;
-    chn = p->event_channel;
+    max = d->max_event_channel;
+    chn = d->event_channel;
 
     for ( port = 0; port < max; port++ )
         if ( chn[port].state == ECS_FREE )
@@ -54,14 +54,14 @@ static int get_free_port(struct domain *p)
 
         memset(chn, 0, max * sizeof(event_channel_t));
 
-        if ( p->event_channel != NULL )
+        if ( d->event_channel != NULL )
         {
-            memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
-            kfree(p->event_channel);
+            memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
+            kfree(d->event_channel);
         }
 
-        p->event_channel     = chn;
-        p->max_event_channel = max;
+        d->event_channel     = chn;
+        d->max_event_channel = max;
     }
 
     return port;
@@ -69,10 +69,10 @@ static int get_free_port(struct domain *p)
 
 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
 {
-    struct domain *p1, *p2;
-    int                 port1 = 0, port2 = 0;
-    domid_t             dom1 = bind->dom1, dom2 = bind->dom2;
-    long                rc = 0;
+    struct domain *d1, *d2;
+    int            port1 = 0, port2 = 0;
+    domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
+    long           rc = 0;
 
     if ( !IS_PRIV(current) )
         return -EPERM;
@@ -82,60 +82,60 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
     if ( dom2 == DOMID_SELF )
         dom2 = current->domain;
 
-    if ( ((p1 = find_domain_by_id(dom1)) == NULL) ||
-         ((p2 = find_domain_by_id(dom2)) == NULL) )
+    if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
+         ((d2 = find_domain_by_id(dom2)) == NULL) )
     {
-        if ( p1 != NULL )
-            put_domain(p1);
+        if ( d1 != NULL )
+            put_domain(d1);
         return -ESRCH;
     }
 
     /* Avoid deadlock by first acquiring lock of domain with smaller id. */
     if ( dom1 < dom2 )
     {
-        spin_lock(&p1->event_channel_lock);
-        spin_lock(&p2->event_channel_lock);
+        spin_lock(&d1->event_channel_lock);
+        spin_lock(&d2->event_channel_lock);
     }
     else
     {
-        if ( p1 != p2 )
-            spin_lock(&p2->event_channel_lock);
-        spin_lock(&p1->event_channel_lock);
+        if ( d1 != d2 )
+            spin_lock(&d2->event_channel_lock);
+        spin_lock(&d1->event_channel_lock);
     }
 
-    if ( (port1 = get_free_port(p1)) < 0 )
+    if ( (port1 = get_free_port(d1)) < 0 )
     {
         rc = port1;
         goto out;
     }
 
     /* 'Allocate' port1 before searching for a free port2. */
-    p1->event_channel[port1].state = ECS_INTERDOMAIN;
+    d1->event_channel[port1].state = ECS_INTERDOMAIN;
 
-    if ( (port2 = get_free_port(p2)) < 0 )
+    if ( (port2 = get_free_port(d2)) < 0 )
     {
-        p1->event_channel[port1].state = ECS_FREE;
+        d1->event_channel[port1].state = ECS_FREE;
         rc = port2;
         goto out;
     }
 
-    p1->event_channel[port1].u.remote.dom  = p2;
-    p1->event_channel[port1].u.remote.port = (u16)port2;
+    d1->event_channel[port1].u.remote.dom  = d2;
+    d1->event_channel[port1].u.remote.port = (u16)port2;
 
-    p2->event_channel[port2].u.remote.dom  = p1;
-    p2->event_channel[port2].u.remote.port = (u16)port1;
-    p2->event_channel[port2].state         = ECS_INTERDOMAIN;
+    d2->event_channel[port2].u.remote.dom  = d1;
+    d2->event_channel[port2].u.remote.port = (u16)port1;
+    d2->event_channel[port2].state         = ECS_INTERDOMAIN;
 
-    evtchn_set_pending(p1, port1);
-    evtchn_set_pending(p2, port2);
+    evtchn_set_pending(d1, port1);
+    evtchn_set_pending(d2, port2);
     
  out:
-    spin_unlock(&p1->event_channel_lock);
-    if ( p1 != p2 )
-        spin_unlock(&p2->event_channel_lock);
+    spin_unlock(&d1->event_channel_lock);
+    if ( d1 != d2 )
+        spin_unlock(&d2->event_channel_lock);
     
-    put_domain(p1);
-    put_domain(p2);
+    put_domain(d1);
+    put_domain(d2);
 
     bind->port1 = port1;
     bind->port2 = port2;
@@ -146,32 +146,31 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
 
 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
 {
-    struct domain *p = current;
-    int virq = bind->virq;
-    int port;
+    struct domain *d = current;
+    int            port, virq = bind->virq;
 
-    if ( virq >= ARRAY_SIZE(p->virq_to_evtchn) )
+    if ( virq >= ARRAY_SIZE(d->virq_to_evtchn) )
         return -EINVAL;
 
-    spin_lock(&p->event_channel_lock);
+    spin_lock(&d->event_channel_lock);
 
     /*
      * Port 0 is the fallback port for VIRQs that haven't been explicitly
      * bound yet. The exception is the 'misdirect VIRQ', which is permanently 
      * bound to port 0.
      */
-    if ( ((port = p->virq_to_evtchn[virq]) != 0) ||
+    if ( ((port = d->virq_to_evtchn[virq]) != 0) ||
          (virq == VIRQ_MISDIRECT) ||
-         ((port = get_free_port(p)) < 0) )
+         ((port = get_free_port(d)) < 0) )
         goto out;
 
-    p->event_channel[port].state  = ECS_VIRQ;
-    p->event_channel[port].u.virq = virq;
+    d->event_channel[port].state  = ECS_VIRQ;
+    d->event_channel[port].u.virq = virq;
 
-    p->virq_to_evtchn[virq] = port;
+    d->virq_to_evtchn[virq] = port;
 
  out:
-    spin_unlock(&p->event_channel_lock);
+    spin_unlock(&d->event_channel_lock);
 
     if ( port < 0 )
         return port;
@@ -183,34 +182,33 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
 
 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
 {
-    struct domain *p = current;
-    int pirq = bind->pirq;
-    int port, rc;
+    struct domain *d = current;
+    int            port, rc, pirq = bind->pirq;
 
-    if ( pirq >= ARRAY_SIZE(p->pirq_to_evtchn) )
+    if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
         return -EINVAL;
 
-    spin_lock(&p->event_channel_lock);
+    spin_lock(&d->event_channel_lock);
 
-    if ( ((rc = port = p->pirq_to_evtchn[pirq]) != 0) ||
-         ((rc = port = get_free_port(p)) < 0) )
+    if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
+         ((rc = port = get_free_port(d)) < 0) )
         goto out;
 
-    p->pirq_to_evtchn[pirq] = port;
-    rc = pirq_guest_bind(p, pirq, 
+    d->pirq_to_evtchn[pirq] = port;
+    rc = pirq_guest_bind(d, pirq, 
                          !!(bind->flags & BIND_PIRQ__WILL_SHARE));
     if ( rc != 0 )
     {
-        p->pirq_to_evtchn[pirq] = 0;
+        d->pirq_to_evtchn[pirq] = 0;
         DPRINTK("Couldn't bind to PIRQ %d (error=%d)\n", pirq, rc);
         goto out;
     }
 
-    p->event_channel[port].state  = ECS_PIRQ;
-    p->event_channel[port].u.pirq = pirq;
+    d->event_channel[port].state  = ECS_PIRQ;
+    d->event_channel[port].u.pirq = pirq;
 
  out:
-    spin_unlock(&p->event_channel_lock);
+    spin_unlock(&d->event_channel_lock);
 
     if ( rc < 0 )
         return rc;
@@ -220,20 +218,20 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
 }
 
 
-static long __evtchn_close(struct domain *p1, int port1)
+static long __evtchn_close(struct domain *d1, int port1)
 {
-    struct domain *p2 = NULL;
-    event_channel_t    *chn1, *chn2;
-    int                 port2;
-    long                rc = 0;
+    struct domain   *d2 = NULL;
+    event_channel_t *chn1, *chn2;
+    int              port2;
+    long             rc = 0;
 
  again:
-    spin_lock(&p1->event_channel_lock);
+    spin_lock(&d1->event_channel_lock);
 
-    chn1 = p1->event_channel;
+    chn1 = d1->event_channel;
 
     /* NB. Port 0 is special (VIRQ_MISDIRECT). Never let it be closed. */
-    if ( (port1 <= 0) || (port1 >= p1->max_event_channel) )
+    if ( (port1 <= 0) || (port1 >= d1->max_event_channel) )
     {
         rc = -EINVAL;
         goto out;
@@ -249,59 +247,59 @@ static long __evtchn_close(struct domain *p1, int port1)
         break;
 
     case ECS_PIRQ:
-        if ( (rc = pirq_guest_unbind(p1, chn1[port1].u.pirq)) == 0 )
-            p1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
+        if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
+            d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
         break;
 
     case ECS_VIRQ:
-        p1->virq_to_evtchn[chn1[port1].u.virq] = 0;
+        d1->virq_to_evtchn[chn1[port1].u.virq] = 0;
         break;
 
     case ECS_INTERDOMAIN:
-        if ( p2 == NULL )
+        if ( d2 == NULL )
         {
-            p2 = chn1[port1].u.remote.dom;
+            d2 = chn1[port1].u.remote.dom;
 
-            /* If we unlock p1 then we could lose p2. Must get a reference. */
-            if ( unlikely(!get_domain(p2)) )
+            /* If we unlock d1 then we could lose d2. Must get a reference. */
+            if ( unlikely(!get_domain(d2)) )
             {
                 /*
-                 * Failed to obtain a reference. No matter: p2 must be dying
+                 * Failed to obtain a reference. No matter: d2 must be dying
                  * and so will close this event channel for us.
                  */
-                p2 = NULL;
+                d2 = NULL;
                 goto out;
             }
 
-            if ( p1->domain < p2->domain )
+            if ( d1->domain < d2->domain )
             {
-                spin_lock(&p2->event_channel_lock);
+                spin_lock(&d2->event_channel_lock);
             }
-            else if ( p1 != p2 )
+            else if ( d1 != d2 )
             {
-                spin_unlock(&p1->event_channel_lock);
-                spin_lock(&p2->event_channel_lock);
+                spin_unlock(&d1->event_channel_lock);
+                spin_lock(&d2->event_channel_lock);
                 goto again;
             }
         }
-        else if ( p2 != chn1[port1].u.remote.dom )
+        else if ( d2 != chn1[port1].u.remote.dom )
         {
             rc = -EINVAL;
             goto out;
         }
     
-        chn2  = p2->event_channel;
+        chn2  = d2->event_channel;
         port2 = chn1[port1].u.remote.port;
 
-        if ( port2 >= p2->max_event_channel )
+        if ( port2 >= d2->max_event_channel )
             BUG();
         if ( chn2[port2].state != ECS_INTERDOMAIN )
             BUG();
-        if ( chn2[port2].u.remote.dom != p1 )
+        if ( chn2[port2].u.remote.dom != d1 )
             BUG();
 
         chn2[port2].state = ECS_UNBOUND;
-        evtchn_set_exception(p2, port2);
+        evtchn_set_exception(d2, port2);
 
         break;
 
@@ -310,17 +308,17 @@ static long __evtchn_close(struct domain *p1, int port1)
     }
 
     chn1[port1].state = ECS_FREE;
-    evtchn_set_exception(p1, port1);
+    evtchn_set_exception(d1, port1);
 
  out:
-    if ( p2 != NULL )
+    if ( d2 != NULL )
     {
-        if ( p1 != p2 )
-            spin_unlock(&p2->event_channel_lock);
-        put_domain(p2);
+        if ( d1 != d2 )
+            spin_unlock(&d2->event_channel_lock);
+        put_domain(d2);
     }
     
-    spin_unlock(&p1->event_channel_lock);
+    spin_unlock(&d1->event_channel_lock);
 
     return rc;
 }
@@ -328,46 +326,46 @@ static long __evtchn_close(struct domain *p1, int port1)
 
 static long evtchn_close(evtchn_close_t *close)
 {
-    struct domain *p;
-    long                rc;
-    domid_t             dom = close->dom;
+    struct domain *d;
+    long           rc;
+    domid_t        dom = close->dom;
 
     if ( dom == DOMID_SELF )
         dom = current->domain;
     else if ( !IS_PRIV(current) )
         return -EPERM;
 
-    if ( (p = find_domain_by_id(dom)) == NULL )
+    if ( (d = find_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    rc = __evtchn_close(p, close->port);
+    rc = __evtchn_close(d, close->port);
 
-    put_domain(p);
+    put_domain(d);
     return rc;
 }
 
 
 static long evtchn_send(int lport)
 {
-    struct domain *lp = current, *rp;
-    int                 rport;
+    struct domain *ld = current, *rd;
+    int            rport;
 
-    spin_lock(&lp->event_channel_lock);
+    spin_lock(&ld->event_channel_lock);
 
     if ( unlikely(lport < 0) ||
-         unlikely(lport >= lp->max_event_channel) || 
-         unlikely(lp->event_channel[lport].state != ECS_INTERDOMAIN) )
+         unlikely(lport >= ld->max_event_channel) || 
+         unlikely(ld->event_channel[lport].state != ECS_INTERDOMAIN) )
     {
-        spin_unlock(&lp->event_channel_lock);
+        spin_unlock(&ld->event_channel_lock);
         return -EINVAL;
     }
 
-    rp    = lp->event_channel[lport].u.remote.dom;
-    rport = lp->event_channel[lport].u.remote.port;
+    rd    = ld->event_channel[lport].u.remote.dom;
+    rport = ld->event_channel[lport].u.remote.port;
 
-    evtchn_set_pending(rp, rport);
+    evtchn_set_pending(rd, rport);
 
-    spin_unlock(&lp->event_channel_lock);
+    spin_unlock(&ld->event_channel_lock);
 
     return 0;
 }
@@ -375,25 +373,25 @@ static long evtchn_send(int lport)
 
 static long evtchn_status(evtchn_status_t *status)
 {
-    struct domain *p;
-    domid_t             dom = status->dom;
-    int                 port = status->port;
-    event_channel_t    *chn;
-    long                rc = 0;
+    struct domain   *d;
+    domid_t          dom = status->dom;
+    int              port = status->port;
+    event_channel_t *chn;
+    long             rc = 0;
 
     if ( dom == DOMID_SELF )
         dom = current->domain;
     else if ( !IS_PRIV(current) )
         return -EPERM;
 
-    if ( (p = find_domain_by_id(dom)) == NULL )
+    if ( (d = find_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    spin_lock(&p->event_channel_lock);
+    spin_lock(&d->event_channel_lock);
 
-    chn = p->event_channel;
+    chn = d->event_channel;
 
-    if ( (port < 0) || (port >= p->max_event_channel) )
+    if ( (port < 0) || (port >= d->max_event_channel) )
     {
         rc = -EINVAL;
         goto out;
@@ -425,8 +423,8 @@ static long evtchn_status(evtchn_status_t *status)
     }
 
  out:
-    spin_unlock(&p->event_channel_lock);
-    put_domain(p);
+    spin_unlock(&d->event_channel_lock);
+    put_domain(d);
     return rc;
 }
 
@@ -482,28 +480,28 @@ long do_event_channel_op(evtchn_op_t *uop)
 }
 
 
-int init_event_channels(struct domain *p)
+int init_event_channels(struct domain *d)
 {
-    spin_lock_init(&p->event_channel_lock);
-    p->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
+    spin_lock_init(&d->event_channel_lock);
+    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
                                GFP_KERNEL);
-    if ( unlikely(p->event_channel == NULL) )
+    if ( unlikely(d->event_channel == NULL) )
         return -ENOMEM;
-    p->max_event_channel = INIT_EVENT_CHANNELS;
-    memset(p->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
-    p->event_channel[0].state  = ECS_VIRQ;
-    p->event_channel[0].u.virq = VIRQ_MISDIRECT;
+    d->max_event_channel = INIT_EVENT_CHANNELS;
+    memset(d->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
+    d->event_channel[0].state  = ECS_VIRQ;
+    d->event_channel[0].u.virq = VIRQ_MISDIRECT;
     return 0;
 }
 
 
-void destroy_event_channels(struct domain *p)
+void destroy_event_channels(struct domain *d)
 {
     int i;
-    if ( p->event_channel != NULL )
+    if ( d->event_channel != NULL )
     {
-        for ( i = 0; i < p->max_event_channel; i++ )
-            (void)__evtchn_close(p, i);
-        kfree(p->event_channel);
+        for ( i = 0; i < d->max_event_channel; i++ )
+            (void)__evtchn_close(d, i);
+        kfree(d->event_channel);
     }
 }
index c0ad2d9b6e196201e61db1054e11460029e78d90..2b6d1ec021b967de580902a9b2ade830d7562aad 100644 (file)
@@ -16,7 +16,6 @@
 #include <xen/sched.h>
 #include <xen/mm.h>
 #include <xen/delay.h>
-#include <xen/interrupt.h>
 #include <xen/compile.h>
 #include <xen/console.h>
 #include <xen/serial.h>
@@ -287,8 +286,8 @@ void cmain(unsigned long magic, multiboot_info_t *mbi)
 
     init_trace_bufs();
 
-    domain_controller_unpause(current);
-    domain_controller_unpause(new_dom);
+    domain_start(current);
+    domain_start(new_dom);
     startup_cpu_idle_loop();
 }
 
index 6a6b82d08594b0d91dbcbcc1e883c730e951500a..162660326669a72ac0ebb77f060acb2df43339e0 100644 (file)
@@ -84,7 +84,8 @@ void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs)
     for_each_domain ( p )
     {
         printk("Xen: DOM %u, CPU %d [has=%c]\n",
-               p->domain, p->processor, p->has_cpu ? 'T':'F'); 
+               p->domain, p->processor, 
+               test_bit(DF_RUNNING, &p->flags) ? 'T':'F'); 
         s = p->shared_info; 
         printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
                s->vcpu_data[0].evtchn_upcall_pending, 
index cb5658dc0be2a9e187751840ef5e7f02f94c7002..088f82d2ff39fdfa399bbde5b9a786d31b4c3cf2 100644 (file)
 #include <xen/sched.h>
 #include <xen/errno.h>
 #include <xen/perfc.h>
-#include <xen/interrupt.h>
+#include <xen/irq.h>
 #include <xen/shadow.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
index 243c06da2e5f55bebfb648b8970ecdafe48c1130..26295f3693237fb6b56f3b7b9e6b843c3c03e8e5 100644 (file)
@@ -22,7 +22,6 @@
 #include <xen/event.h>
 #include <xen/time.h>
 #include <xen/ac_timer.h>
-#include <xen/interrupt.h>
 #include <xen/perfc.h>
 #include <xen/sched-if.h>
 #include <xen/slab.h>
index 14743a27840a7534a8dc69079a82713d1817c68e..cfae3bfedcc178a1abf46d5de2e545a15f45d4ec 100644 (file)
 #include <xen/event.h>
 #include <xen/time.h>
 #include <xen/ac_timer.h>
-#include <xen/interrupt.h>
 #include <xen/perfc.h>
 #include <xen/sched-if.h>
-#include <hypervisor-ifs/sched_ctl.h>
+#include <xen/softirq.h>
 #include <xen/trace.h>
+#include <hypervisor-ifs/sched_ctl.h>
 
-/*#define WAKEUP_HISTO*/
+/*#define WAKE_HISTO*/
 /*#define BLOCKTIME_HISTO*/
 
-#if defined(WAKEUP_HISTO)
+#if defined(WAKE_HISTO)
 #define BUCKETS 31
 #elif defined(BLOCKTIME_HISTO)
 #define BUCKETS 200
@@ -100,83 +100,104 @@ static struct ac_timer fallback_timer[NR_CPUS];
 
 extern kmem_cache_t *domain_struct_cachep;
 
-void free_domain_struct(struct domain *p)
+void free_domain_struct(struct domain *d)
 {
-    SCHED_OP(free_task, p);
-    kmem_cache_free(domain_struct_cachep, p);
+    SCHED_OP(free_task, d);
+    kmem_cache_free(domain_struct_cachep, d);
 }
 
 struct domain *alloc_domain_struct(void)
 {
-    struct domain *p;
+    struct domain *d;
 
-    if ( (p = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
+    if ( (d = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
         return NULL;
     
-    memset(p, 0, sizeof(*p));
+    memset(d, 0, sizeof(*d));
 
-    if ( SCHED_OP(alloc_task, p) < 0 )
+    if ( SCHED_OP(alloc_task, d) < 0 )
     {
-        kmem_cache_free(domain_struct_cachep,p);
+        kmem_cache_free(domain_struct_cachep, d);
         return NULL;
     }
 
-    return p;
+    return d;
 }
 
 /*
  * Add and remove a domain
  */
-void sched_add_domain(struct domain *p
+void sched_add_domain(struct domain *d
 {
-    domain_controller_pause(p);
+    set_bit(DF_STOPPED, &d->flags);
 
-    if ( p->domain != IDLE_DOMAIN_ID )
+    if ( d->domain != IDLE_DOMAIN_ID )
     {
         /* Initialise the per-domain timer. */
-        init_ac_timer(&p->timer);
-        p->timer.cpu      =  p->processor;
-        p->timer.data     = (unsigned long)p;
-        p->timer.function = &dom_timer_fn;
+        init_ac_timer(&d->timer);
+        d->timer.cpu      = d->processor;
+        d->timer.data     = (unsigned long)d;
+        d->timer.function = &dom_timer_fn;
     }
     else
     {
-        schedule_data[p->processor].idle = p;
+        schedule_data[d->processor].idle = d;
     }
 
-    SCHED_OP(add_task, p);
+    SCHED_OP(add_task, d);
 
-    TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(p->domain), _LOW32(p->domain), p);
+    TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(d->domain), _LOW32(d->domain), d);
 }
 
-void sched_rem_domain(struct domain *p
+void sched_rem_domain(struct domain *d
 {
-    rem_ac_timer(&p->timer);
-    SCHED_OP(rem_task, p);
-    TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(p->domain), _LOW32(p->domain), p);
+    rem_ac_timer(&d->timer);
+    SCHED_OP(rem_task, d);
+    TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(d->domain), _LOW32(d->domain), d);
 }
 
 void init_idle_task(void)
 {
     unsigned long flags;
-    struct domain *p = current;
+    struct domain *d = current;
 
-    if ( SCHED_OP(alloc_task, p) < 0)
+    if ( SCHED_OP(alloc_task, d) < 0)
         panic("Failed to allocate scheduler private data for idle task");
-    SCHED_OP(add_task, p);
+    SCHED_OP(add_task, d);
+
+    spin_lock_irqsave(&schedule_lock[d->processor], flags);
+    set_bit(DF_RUNNING, &d->flags);
+    if ( !__task_on_runqueue(d) )
+        __add_to_runqueue_head(d);
+    spin_unlock_irqrestore(&schedule_lock[d->processor], flags);
+}
+
+void domain_sleep(struct domain *d)
+{
+    unsigned long flags;
+    int           cpu = d->processor;
+
+    spin_lock_irqsave(&schedule_lock[cpu], flags);
+
+    if ( test_bit(DF_RUNNING, &d->flags) )
+        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+    else if ( __task_on_runqueue(d) )
+        __del_from_runqueue(d);
+
+    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
 
-    spin_lock_irqsave(&schedule_lock[p->processor], flags);
-    p->has_cpu = 1;
-    if ( !__task_on_runqueue(p) )
-        __add_to_runqueue_head(p);
-    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
+    /* Synchronous. */
+    while ( test_bit(DF_RUNNING, &d->flags) )
+    {
+        smp_mb();
+        cpu_relax();
+    }
 }
 
-/* Returns TRUE if the domain was actually woken up. */
-int domain_wakeup(struct domain *d)
+void domain_wake(struct domain *d)
 {
     unsigned long       flags;
-    int                 cpu = d->processor, woken_up = 0;
+    int                 cpu = d->processor;
     struct domain      *curr;
     s_time_t            now, min_time;
 
@@ -184,17 +205,12 @@ int domain_wakeup(struct domain *d)
 
     if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
     {
-        woken_up = 1;
-
         TRACE_3D(TRC_SCHED_WAKE, _HIGH32(d->domain), _LOW32(d->domain), d);
         SCHED_OP(wake_up, d);
-#ifdef WAKEUP_HISTO
+#ifdef WAKE_HISTO
         p->wokenup = NOW();
 #endif
 
-        ASSERT(__task_on_runqueue(d));
-        ASSERT(!d->has_cpu);
-
         now = NOW();
         curr = schedule_data[cpu].curr;
 
@@ -208,34 +224,8 @@ int domain_wakeup(struct domain *d)
     }
 
     spin_unlock_irqrestore(&schedule_lock[cpu], flags);
-
-    return woken_up;
 }
 
-
-void __domain_pause(struct domain *d)
-{
-    unsigned long flags;
-    int           cpu = d->processor;
-
-    spin_lock_irqsave(&schedule_lock[cpu], flags);
-
-    if ( d->has_cpu )
-        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
-    else if ( __task_on_runqueue(d) )
-        __del_from_runqueue(d);
-
-    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
-
-    /* Synchronous. */
-    while ( d->has_cpu )
-    {
-        smp_mb();
-        cpu_relax();
-    }
-}
-
-
 /* Block the currently-executing domain until a pertinent event occurs. */
 long do_block(void)
 {
@@ -247,9 +237,7 @@ long do_block(void)
     return 0;
 }
 
-/*
- * Voluntarily yield the processor for this allocation.
- */
+/* Voluntarily yield the processor for this allocation. */
 static long do_yield(void)
 {
     TRACE_2D(TRC_SCHED_YIELD, current->domain, current);
@@ -394,9 +382,6 @@ void __enter_scheduler(void)
     r_time = next_slice.time;
     next = next_slice.task;
 
-    prev->has_cpu = 0;
-    next->has_cpu = 1;
-
     schedule_data[cpu].curr = next;
 
     next->lastschd = now;
@@ -416,7 +401,7 @@ void __enter_scheduler(void)
     
     perfc_incrc(sched_ctx);
 
-#if defined(WAKEUP_HISTO)
+#if defined(WAKE_HISTO)
     if ( !is_idle_task(next) && next->wokenup ) {
         ulong diff = (ulong)(now - next->wokenup);
         diff /= (ulong)MILLISECS(1);
@@ -437,7 +422,16 @@ void __enter_scheduler(void)
     TRACE_2D(TRC_SCHED_SWITCH, next->domain, next);
 
     switch_to(prev, next);
-    
+
+    /*
+     * We do this late on because it doesn't need to be protected by the
+     * schedule_lock, and because we want this to be the very last use of
+     * 'prev' (after this point, a dying domain's info structure may be freed
+     * without warning). 
+     */
+    clear_bit(DF_RUNNING, &prev->flags);
+    set_bit(DF_RUNNING, &next->flags);
+
     /* Mark a timer event for the newly-scheduled domain. */
     if ( !is_idle_task(next) )
         send_guest_virq(next, VIRQ_TIMER);
@@ -578,56 +572,61 @@ static void dump_rqueue(struct list_head *queue, char *name)
 {
     struct list_head *list;
     int loop = 0;
-    struct domain  *p;
-
-    printk ("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
-            (unsigned long) queue->next, (unsigned long) queue->prev);
-    list_for_each (list, queue) {
-        p = list_entry(list, struct domain, run_list);
-        printk("%3d: %u has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
-        SCHED_OP(dump_runq_el, p);
-        printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
+    struct domain *d;
+
+    printk("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
+           (unsigned long) queue->next, (unsigned long) queue->prev);
+
+    list_for_each ( list, queue )
+    {
+        d = list_entry(list, struct domain, run_list);
+        printk("%3d: %u has=%c ", loop++, d->domain, 
+               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+        SCHED_OP(dump_runq_el, d);
+        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
                (unsigned long)list, (unsigned long)list->next,
                (unsigned long)list->prev);
     }
-    return; 
 }
 
 void dump_runq(u_char key, void *dev_id, struct pt_regs *regs)
 {
-    u_long   flags; 
-    s_time_t now = NOW();
-    int i;
+    unsigned long flags; 
+    s_time_t      now = NOW();
+    int           i;
 
     printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
     SCHED_OP(dump_settings);
     printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now); 
-    for (i = 0; i < smp_num_cpus; i++) {
+    for ( i = 0; i < smp_num_cpus; i++ )
+    {
         spin_lock_irqsave(&schedule_lock[i], flags);
         printk("CPU[%02d] ", i);
         SCHED_OP(dump_cpu_state,i);
         dump_rqueue(&schedule_data[i].runqueue, "rq"); 
         spin_unlock_irqrestore(&schedule_lock[i], flags);
     }
-    return; 
 }
 
-#if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO)
+#if defined(WAKE_HISTO) || defined(BLOCKTIME_HISTO)
 void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
 {
-    int loop, i, j;
-    for (loop = 0; loop < smp_num_cpus; loop++) {
+    int i, j, k;
+    for ( k = 0; k < smp_num_cpus; k++ )
+    {
         j = 0;
-        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", loop);
-        for (i=0; i<BUCKETS; i++) {
-            if (schedule_data[loop].hist[i]) {
-                if (i < BUCKETS-1)
-                    printk("%2d:[%7u]    ", i, schedule_data[loop].hist[i]);
+        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", k);
+        for ( i = 0; i < BUCKETS; i++ )
+        {
+            if ( schedule_data[k].hist[i] != 0 )
+            {
+                if ( i < BUCKETS-1 )
+                    printk("%2d:[%7u]    ", i, schedule_data[k].hist[i]);
                 else
-                    printk(" >:[%7u]    ", schedule_data[loop].hist[i]);
-                j++;
-                if (!(j % 5)) printk("\n");
+                    printk(" >:[%7u]    ", schedule_data[k].hist[i]);
+                if ( !(++j % 5) )
+                    printk("\n");
             }
         }
         printk("\n");
@@ -636,10 +635,10 @@ void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
 }
 void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
 {
-    int loop, i;
-    for (loop = 0; loop < smp_num_cpus; loop++)
-        for (i=0; i<BUCKETS; i++
-            schedule_data[loop].hist[i]=0;
+    int i, j;
+    for ( j = 0; j < smp_num_cpus; j++ )
+        for ( i=0; i < BUCKETS; i++ 
+            schedule_data[j].hist[i] = 0;
 }
 #else
 void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
index 71d7cc8087a2c5708c36f1d4f527d81764dc7d9e..8936a6422de2c5bfef763828ac2561d2e1c8b2d4 100644 (file)
@@ -331,18 +331,18 @@ void shadow_mode_disable( struct domain *p )
     kfree( &m->shadow_ht[0] );
 }
 
-static int shadow_mode_table_op( struct domain *p
-                                                                dom0_shadow_control_t *sc )
+static int shadow_mode_table_op(struct domain *d
+                                                           dom0_shadow_control_t *sc)
 {
     unsigned int op = sc->op;
-    struct mm_struct *m = &p->mm;
+    struct mm_struct *m = &d->mm;
     int rc = 0;
 
     // since Dom0 did the hypercall, we should be running with it's page
     // tables right now. Calling flush on yourself would be really
     // stupid.
 
-    ASSERT(spin_is_locked(&p->mm.shadow_lock));
+    ASSERT(spin_is_locked(&d->mm.shadow_lock));
 
     if ( m == &current->mm )
     {
@@ -380,44 +380,44 @@ static int shadow_mode_table_op( struct domain *p,
                
        send_bitmap:
 
-               if( p->tot_pages > sc->pages || 
-                       !sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
+               if( d->tot_pages > sc->pages || 
+                       !sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
                {
                        rc = -EINVAL;
                        goto out;
                }
 
-               sc->fault_count = p->mm.shadow_fault_count;
-               sc->dirty_count = p->mm.shadow_dirty_count;
-               p->mm.shadow_fault_count = 0;
-               p->mm.shadow_dirty_count = 0;
+               sc->fault_count = d->mm.shadow_fault_count;
+               sc->dirty_count = d->mm.shadow_dirty_count;
+               d->mm.shadow_fault_count = 0;
+               d->mm.shadow_dirty_count = 0;
        
-               sc->pages = p->tot_pages;
+               sc->pages = d->tot_pages;
        
 #define chunk (8*1024) // do this in 1KB chunks for L1 cache
        
-               for(i=0;i<p->tot_pages;i+=chunk)
+               for(i=0;i<d->tot_pages;i+=chunk)
                {
-                       int bytes = ((  ((p->tot_pages-i) > (chunk))?
-                                                       (chunk):(p->tot_pages-i) ) + 7) / 8;
+                       int bytes = ((  ((d->tot_pages-i) > (chunk))?
+                                                       (chunk):(d->tot_pages-i) ) + 7) / 8;
            
                        copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
-                                                 p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
+                                                 d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
                                                  bytes );
            
                        for(j=0; zero && j<bytes/sizeof(unsigned long);j++)
                        {
-                               if( p->mm.shadow_dirty_bitmap[j] != 0 )
+                               if( d->mm.shadow_dirty_bitmap[j] != 0 )
                                        zero = 0;
                        }
 
-                       memset( p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
+                       memset( d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
                                        0, bytes);
                }
 
         /* Might as well stop the domain as an optimization. */
                if ( zero )
-            domain_controller_pause(p);
+            domain_stop(d);
 
                break;
     }
@@ -426,24 +426,24 @@ static int shadow_mode_table_op( struct domain *p,
     {
                int i;
        
-               if( p->tot_pages > sc->pages || 
-                       !sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
+               if( d->tot_pages > sc->pages || 
+                       !sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
                {
                        rc = -EINVAL;
                        goto out;
                }
        
-               sc->pages = p->tot_pages;
+               sc->pages = d->tot_pages;
        
 #define chunk (8*1024) // do this in 1KB chunks for L1 cache
        
-               for(i=0;i<p->tot_pages;i+=chunk)
+               for(i=0;i<d->tot_pages;i+=chunk)
                {
-                       int bytes = ((  ((p->tot_pages-i) > (chunk))?
-                                                       (chunk):(p->tot_pages-i) ) + 7) / 8;
+                       int bytes = ((  ((d->tot_pages-i) > (chunk))?
+                                                       (chunk):(d->tot_pages-i) ) + 7) / 8;
            
                        copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
-                                                 p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
+                                                 d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
                                                  bytes );          
                }
 
index 63d2c8d859a6b74521d1783e9797ba7ddaa9e328..3e1472e94abf4a5138968ee07ce9fccf9582da99 100644 (file)
  */
 
 #include <xen/config.h>
+#include <xen/init.h>
 #include <xen/mm.h>
 #include <xen/sched.h>
-#include <xen/interrupt.h>
-#include <xen/init.h>
+#include <xen/softirq.h>
 
 irq_cpustat_t irq_stat[NR_CPUS];
 
@@ -37,20 +37,6 @@ asmlinkage void do_softirq()
     }
 }
 
-inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
-{
-    __cpu_raise_softirq(cpu, nr);
-#ifdef CONFIG_SMP
-    if ( cpu != smp_processor_id() )
-        smp_send_event_check_cpu(cpu);
-#endif
-}
-
-void raise_softirq(unsigned int nr)
-{
-    __cpu_raise_softirq(smp_processor_id(), nr);
-}
-
 void open_softirq(int nr, softirq_handler handler)
 {
     softirq_handlers[nr] = handler;
index 499f852fdae9004655c3e5cd0bbde89ad5296882..e6ae6c60b9f282f1c320403093b8492446a8f3f5 100644 (file)
@@ -8,14 +8,14 @@
  * Copyright (c) 2003-2004, K A Fraser
  */
 
-#include <asm/io.h>
-#include <xen/sched.h>
+#include <xen/config.h>
+#include <xen/irq.h>
 #include <xen/keyhandler.h> 
+#include <asm/pdb.h>
 #include <xen/reboot.h>
-#include <xen/interrupt.h>
-#include <xen/irq.h>
+#include <xen/sched.h>
 #include <xen/serial.h>
-#include <asm/pdb.h>
+#include <asm/io.h>
 
 /* Register offsets */
 #define RBR             0x00    /* receive buffer       */
index 95c72010a162d64b940fe08605c8d745b82588e4..0393215493229d540fb23ae87e8346bc1a4b340b 100644 (file)
  */
 
 /* Schedule an asynchronous callback for the specified domain. */
-static inline void guest_async_callback(struct domain *p)
+static inline void guest_async_callback(struct domain *d)
 {
-    if ( !domain_unblock(p) && p->has_cpu && (p != current) )
-        smp_send_event_check_mask(1 << p->processor);
+    int running = test_bit(DF_RUNNING, &d->flags);
+    domain_unblock(d);
+    if ( running )
+        smp_send_event_check_cpu(d->processor);
 }
 
 /*
@@ -31,43 +33,43 @@ static inline void guest_async_callback(struct domain *p)
  * may require explicit memory barriers.
  */
 
-static inline void evtchn_set_pending(struct domain *p, int port)
+static inline void evtchn_set_pending(struct domain *d, int port)
 {
-    shared_info_t *s = p->shared_info;
+    shared_info_t *s = d->shared_info;
     if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
          !test_bit        (port,    &s->evtchn_mask[0])    &&
          !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
     {
         /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
         s->vcpu_data[0].evtchn_upcall_pending = 1;
-        guest_async_callback(p);
+        guest_async_callback(d);
     }
 }
 
-static inline void evtchn_set_exception(struct domain *p, int port)
+static inline void evtchn_set_exception(struct domain *d, int port)
 {
-    if ( !test_and_set_bit(port, &p->shared_info->evtchn_exception[0]) )
-        evtchn_set_pending(p, port);
+    if ( !test_and_set_bit(port, &d->shared_info->evtchn_exception[0]) )
+        evtchn_set_pending(d, port);
 }
 
 /*
  * send_guest_virq:
- *  @p:        Domain to which virtual IRQ should be sent
+ *  @d:        Domain to which virtual IRQ should be sent
  *  @virq:     Virtual IRQ number (VIRQ_*)
  */
-static inline void send_guest_virq(struct domain *p, int virq)
+static inline void send_guest_virq(struct domain *d, int virq)
 {
-    evtchn_set_pending(p, p->virq_to_evtchn[virq]);
+    evtchn_set_pending(d, d->virq_to_evtchn[virq]);
 }
 
 /*
  * send_guest_pirq:
- *  @p:        Domain to which physical IRQ should be sent
+ *  @d:        Domain to which physical IRQ should be sent
  *  @pirq:     Physical IRQ number
  */
-static inline void send_guest_pirq(struct domain *p, int pirq)
+static inline void send_guest_pirq(struct domain *d, int pirq)
 {
-    evtchn_set_pending(p, p->pirq_to_evtchn[pirq]);
+    evtchn_set_pending(d, d->pirq_to_evtchn[pirq]);
 }
 
 #define event_pending(_d)                                     \
diff --git a/xen/include/xen/interrupt.h b/xen/include/xen/interrupt.h
deleted file mode 100644 (file)
index 0e69ba8..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _LINUX_INTERRUPT_H
-#define _LINUX_INTERRUPT_H
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <xen/smp.h>
-#include <xen/cache.h>
-
-#include <asm/bitops.h>
-#include <asm/atomic.h>
-#include <asm/ptrace.h>
-
-struct irqaction
-{
-    void (*handler)(int, void *, struct pt_regs *);
-    const char *name;
-    void *dev_id;
-};
-
-#include <asm/hardirq.h>
-
-enum
-{
-    AC_TIMER_SOFTIRQ=0,
-    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
-    SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
-    NR_SOFTIRQS
-};
-
-typedef void (*softirq_handler)(void);
-
-asmlinkage void do_softirq(void);
-extern void open_softirq(int nr, softirq_handler handler);
-#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
-extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
-
-#endif
index 51f958806712829fa40ac4e70d332ebef17cf91e..1cf5a50021b46fb3e94b14e4cf4694a5731bad8a 100644 (file)
@@ -4,6 +4,14 @@
 #include <xen/config.h>
 #include <xen/spinlock.h>
 #include <asm/ptrace.h>
+#include <asm/hardirq.h>
+
+struct irqaction
+{
+    void (*handler)(int, void *, struct pt_regs *);
+    const char *name;
+    void *dev_id;
+};
 
 /*
  * IRQ line status.
index db54da026a0c752442604168fbc16e32ea085410..4fc3409da8cefc763e7ae0b39a296d1e88f92983 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _LINUX_SCHED_H
-#define _LINUX_SCHED_H
+#ifndef __SCHED_H__
+#define __SCHED_H__
 
 #include <xen/config.h>
 #include <xen/types.h>
@@ -46,8 +46,8 @@ typedef struct event_channel_st
     } u;
 } event_channel_t;
 
-int  init_event_channels(struct domain *p);
-void destroy_event_channels(struct domain *p);
+int  init_event_channels(struct domain *d);
+void destroy_event_channels(struct domain *d);
 
 struct domain 
 {
@@ -85,7 +85,9 @@ struct domain
      * From here on things can be added and shuffled without special attention
      */
 
-    domid_t domain;
+    domid_t  domain;
+    char     name[MAX_DOMAIN_NAME];
+    s_time_t create_time;
 
     spinlock_t       page_list_lock;
     struct list_head page_list;
@@ -94,26 +96,19 @@ struct domain
 
     /* Scheduling. */
     struct list_head run_list;
-    int              has_cpu;
     int              stop_code;     /* stop code from OS (if DF_STOPPED). */
-    int              cpupinned;     /* true if pinned to curent CPU */
     s_time_t         lastschd;      /* time this domain was last scheduled */
     s_time_t         lastdeschd;    /* time this domain was last descheduled */
     s_time_t         cpu_time;      /* total CPU time received till now */
     s_time_t         wokenup;       /* time domain got woken up */
     struct ac_timer  timer;         /* one-shot timer for timeout values */
-
     s_time_t         min_slice;     /* minimum time before reschedule */
-
-    void *sched_priv;               /* scheduler-specific data */
+    void            *sched_priv;    /* scheduler-specific data */
 
     struct mm_struct mm;
 
     mm_segment_t addr_limit;
 
-    char name[MAX_DOMAIN_NAME];
-    s_time_t create_time;
-
     struct thread_struct thread;
     struct domain *next_list, *next_hash;
 
@@ -156,7 +151,6 @@ struct domain
 {                                \
     processor:   0,              \
     domain:      IDLE_DOMAIN_ID, \
-    has_cpu:     0,              \
     mm:          IDLE0_MM,       \
     addr_limit:  KERNEL_DS,      \
     thread:      INIT_THREAD,    \
@@ -172,7 +166,7 @@ extern struct domain *idle_task[NR_CPUS];
 
 #include <xen/slab.h>
 
-void free_domain_struct(struct domain *p);
+void free_domain_struct(struct domain *d);
 struct domain *alloc_domain_struct();
 
 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
@@ -186,13 +180,13 @@ static inline int get_domain(struct domain *d)
   
 extern struct domain *do_createdomain(
     domid_t dom_id, unsigned int cpu);
-extern int construct_dom0(struct domain *p
+extern int construct_dom0(struct domain *d
                           unsigned long alloc_start,
                           unsigned long alloc_end,
                           char *image_start, unsigned long image_len, 
                           char *initrd_start, unsigned long initrd_len,
                           char *cmdline);
-extern int final_setup_guestos(struct domain *p, dom0_builddomain_t *);
+extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
 
 struct domain *find_domain_by_id(domid_t dom);
 struct domain *find_last_domain(void);
@@ -202,7 +196,7 @@ extern void domain_crash(void);
 extern void domain_suspend(u8 reason);
 
 /* arch/process.c */
-void new_thread(struct domain *p,
+void new_thread(struct domain *d,
                 unsigned long start_pc,
                 unsigned long start_stack,
                 unsigned long start_info);
@@ -218,14 +212,14 @@ extern spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned;
 #define set_current_state(_s) do { current->state = (_s); } while (0)
 void scheduler_init(void);
 void schedulers_start(void);
-void sched_add_domain(struct domain *p);
-void sched_rem_domain(struct domain *p);
+void sched_add_domain(struct domain *d);
+void sched_rem_domain(struct domain *d);
 long sched_ctl(struct sched_ctl_cmd *);
 long sched_adjdom(struct sched_adjdom_cmd *);
 int  sched_id();
 void init_idle_task(void);
-int domain_wakeup(struct domain *p);
-void __domain_pause(struct domain *p);
+void domain_wake(struct domain *d);
+void domain_sleep(struct domain *d);
 
 void __enter_scheduler(void);
 
@@ -259,61 +253,65 @@ extern struct domain *task_list;
 #define DF_PRIVILEGED   5 /* Is this domain privileged?                     */
 #define DF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console?  */
 #define DF_PHYSDEV      7 /* May this domain do IO to physical devices?     */
-
 #define DF_BLOCKED      8 /* Domain is blocked waiting for an event.        */
-#define DF_CONTROLPAUSE 9 /* Domain is paused by control software.          */
+#define DF_STOPPED      9 /* Domain is stopped by control software.          */
 #define DF_SUSPENDED   10 /* Guest suspended its execution for some reason. */
 #define DF_CRASHED     11 /* Domain crashed inside Xen, cannot continue.    */
 #define DF_DYING       12 /* Death rattle.                                  */
+#define DF_RUNNING     13 /* Currently running on a CPU.                    */
+#define DF_CPUPINNED   14 /* Disables auto-migration.                       */
 
-static inline int domain_runnable(struct domain *p)
+static inline int domain_runnable(struct domain *d)
 {
-    return ( (atomic_read(&p->pausecnt) == 0) &&
-             !(p->flags & ((1<<DF_BLOCKED)|(1<<DF_CONTROLPAUSE)|
-                           (1<<DF_SUSPENDED)|(1<<DF_CRASHED)|(1<<DF_DYING))) );
+    return ( (atomic_read(&d->pausecnt) == 0) &&
+             !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_STOPPED)|
+                           (1<<DF_SUSPENDED)|(1<<DF_CRASHED))) );
 }
 
-/* Returns TRUE if the domain was actually unblocked and woken. */
-static inline int domain_unblock(struct domain *d)
+static inline void domain_pause(struct domain *d)
 {
-    if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
-        return domain_wakeup(d);
-    return 0;
+    ASSERT(d != current);
+    atomic_inc(&d->pausecnt);
+    domain_sleep(d);
 }
 
-static inline void domain_unsuspend(struct domain *d)
+static inline void domain_unpause(struct domain *d)
 {
-    if ( test_and_clear_bit(DF_SUSPENDED, &d->flags) )
-        (void)domain_wakeup(d);
+    ASSERT(d != current);
+    if ( atomic_dec_and_test(&d->pausecnt) )
+        domain_wake(d);
 }
 
-static inline void domain_controller_pause(struct domain *d)
+static inline void domain_unblock(struct domain *d)
 {
-    if ( !test_and_set_bit(DF_CONTROLPAUSE, &d->flags) )
-        __domain_pause(d);
+    ASSERT(d != current);
+    if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
+        domain_wake(d);
 }
 
-static inline void domain_controller_unpause(struct domain *d)
+static inline void domain_unsuspend(struct domain *d)
 {
-    if ( test_and_clear_bit(DF_CONTROLPAUSE, &d->flags) )
-        (void)domain_wakeup(d);
+    ASSERT(d != current);
+    if ( test_and_clear_bit(DF_SUSPENDED, &d->flags) )
+        domain_wake(d);
 }
 
-static inline void domain_pause(struct domain *d)
+static inline void domain_stop(struct domain *d)
 {
-    if ( d == current ) BUG();
-    atomic_inc(&d->pausecnt);
-    __domain_pause(d);
+    ASSERT(d != current);
+    if ( !test_and_set_bit(DF_STOPPED, &d->flags) )
+        domain_sleep(d);
 }
 
-static inline void domain_unpause(struct domain *d)
+static inline void domain_start(struct domain *d)
 {
-    if ( atomic_dec_and_test(&d->pausecnt) )
-        (void)domain_wakeup(d);
+    ASSERT(d != current);
+    if ( test_and_clear_bit(DF_STOPPED, &d->flags) )
+        domain_wake(d);
 }
 
 
-#define IS_PRIV(_p) (test_bit(DF_PRIVILEGED, &(_p)->flags))
-#define IS_CAPABLE_PHYSDEV(_p) (test_bit(DF_PHYSDEV, &(_p)->flags))
+#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
+#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
 
-#endif /*_LINUX_SCHED_H */
+#endif /* __SCHED_H__ */
index 2cb88183a904bd256f53bdd850059a7d175d4c3a..b0f39112ced3d26b1b66a58c738624b7c89ba40b 100644 (file)
@@ -25,7 +25,6 @@ extern void smp_send_stop(void);
 extern void FASTCALL(smp_send_event_check_mask(unsigned long cpu_mask));
 #define smp_send_event_check_cpu(_cpu) smp_send_event_check_mask(1<<(_cpu))
 
-
 /*
  * Boot processor call to load the other CPU's
  */
diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h
new file mode 100644 (file)
index 0000000..619c86b
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef __XEN_SOFTIRQ_H__
+#define __XEN_SOFTIRQ_H__
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/smp.h>
+#include <asm/bitops.h>
+#include <asm/hardirq.h>
+
+enum
+{
+    AC_TIMER_SOFTIRQ=0,
+    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
+    SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
+    NR_SOFTIRQS
+};
+
+typedef void (*softirq_handler)(void);
+
+asmlinkage void do_softirq(void);
+extern void open_softirq(int nr, softirq_handler handler);
+
+static inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
+{
+    if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+        smp_send_event_check_cpu(cpu);
+}
+
+static inline void raise_softirq(unsigned int nr)
+{
+    set_bit(nr, &softirq_pending(smp_processor_id()));
+}
+
+#endif /* __XEN_SOFTIRQ_H__ */